static int do_block_io_op(blkif_t *blkif)
{
- blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
blkif_request_t req;
pending_req_t *pending_req;
RING_IDX rc, rp;
int more_to_do = 0;
- rc = blk_ring->req_cons;
- rp = blk_ring->sring->req_prod;
+ rc = blk_rings->common.req_cons;
+ rp = blk_rings->common.sring->req_prod;
rmb(); /* Ensure we see queued requests up to 'rp'. */
- while ((rc != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
+ while ((rc != rp)) {
+
+ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
+ break;
pending_req = alloc_req();
if (NULL == pending_req) {
break;
}
- memcpy(&req, RING_GET_REQUEST(blk_ring, rc), sizeof(req));
- blk_ring->req_cons = ++rc; /* before make_response() */
+ switch (blkif->blk_protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
+ break;
+ case BLKIF_PROTOCOL_X86_32:
+ blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
+ break;
+ case BLKIF_PROTOCOL_X86_64:
+ blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
+ break;
+ default:
+ BUG();
+ }
+ blk_rings->common.req_cons = ++rc; /* before make_response() */
switch (req.operation) {
case BLKIF_OP_READ:
static void make_response(blkif_t *blkif, unsigned long id,
unsigned short op, int st)
{
- blkif_response_t *resp;
+ blkif_response_t resp;
unsigned long flags;
- blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
int more_to_do = 0;
int notify;
- spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-
- /* Place on the response ring for the relevant domain. */
- resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
- resp->id = id;
- resp->operation = op;
- resp->status = st;
- blk_ring->rsp_prod_pvt++;
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
+ resp.id = id;
+ resp.operation = op;
+ resp.status = st;
- if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+ /* Place on the response ring for the relevant domain. */
+ switch (blkif->blk_protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
+ &resp, sizeof(resp));
+ break;
+ case BLKIF_PROTOCOL_X86_32:
+ memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
+ &resp, sizeof(resp));
+ break;
+ case BLKIF_PROTOCOL_X86_64:
+ memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
+ &resp, sizeof(resp));
+ break;
+ default:
+ BUG();
+ }
+ blk_rings->common.rsp_prod_pvt++;
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
+ if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
/*
* Tail check for pending requests. Allows frontend to avoid
* notifications if requests are already in flight (lower
* overheads and promotes batching).
*/
- RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
+ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
- } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
+ } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
more_to_do = 1;
-
}
+
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
if (more_to_do)
#include <asm/pgalloc.h>
#include <xen/evtchn.h>
#include <asm/hypervisor.h>
-#include <xen/interface/io/blkif.h>
-#include <xen/interface/io/ring.h>
+#include <xen/blkif.h>
#include <xen/gnttab.h>
#include <xen/driver_util.h>
#include <xen/xenbus.h>
/* Physical parameters of the comms window. */
unsigned int irq;
/* Comms information. */
- blkif_back_ring_t blk_ring;
+ enum blkif_protocol blk_protocol;
+ blkif_back_rings_t blk_rings;
struct vm_struct *blk_ring_area;
/* The VBD attached to this interface. */
struct vbd vbd;
int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
{
- blkif_sring_t *sring;
int err;
/* Already connected through? */
return err;
}
- sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
- BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
+ switch (blkif->blk_protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ {
+ blkif_sring_t *sring;
+ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
+ BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
+ break;
+ }
+ case BLKIF_PROTOCOL_X86_32:
+ {
+ blkif_x86_32_sring_t *sring_x86_32;
+ sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
+ BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
+ break;
+ }
+ case BLKIF_PROTOCOL_X86_64:
+ {
+ blkif_x86_64_sring_t *sring_x86_64;
+ sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
+ BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
+ break;
+ }
+ default:
+ BUG();
+ }
err = bind_interdomain_evtchn_to_irqhandler(
blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
blkif->irq = 0;
}
- if (blkif->blk_ring.sring) {
+ if (blkif->blk_rings.common.sring) {
unmap_frontend_page(blkif);
free_vm_area(blkif->blk_ring_area);
- blkif->blk_ring.sring = NULL;
+ blkif->blk_rings.common.sring = NULL;
}
}
struct xenbus_device *dev = be->dev;
unsigned long ring_ref;
unsigned int evtchn;
+ char protocol[64] = "";
int err;
DPRINTK("%s", dev->otherend);
return err;
}
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
+ "%63s", protocol, NULL);
+ if (err)
+ strcpy(protocol, "unspecified, assuming native");
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
+ else {
+ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
+ return -1;
+ }
+ printk("blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
+ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
+
/* Map the shared frame, irq etc. */
err = blkif_map(be->blkif, ring_ref, evtchn);
if (err) {
static int print_dbug = 1;
static int do_block_io_op(blkif_t *blkif)
{
- blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
blkif_request_t req;
pending_req_t *pending_req;
RING_IDX rc, rp;
int more_to_do = 0;
tap_blkif_t *info;
- rc = blk_ring->req_cons;
- rp = blk_ring->sring->req_prod;
+ rc = blk_rings->common.req_cons;
+ rp = blk_rings->common.sring->req_prod;
rmb(); /* Ensure we see queued requests up to 'rp'. */
/*Check blkif has corresponding UE ring*/
more_to_do = 1;
break;
}
-
- if (RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
+
+ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) {
WPRINTK("RING_REQUEST_CONS_OVERFLOW!"
" More to do\n");
more_to_do = 1;
break;
}
- memcpy(&req, RING_GET_REQUEST(blk_ring, rc), sizeof(req));
- blk_ring->req_cons = ++rc; /* before make_response() */
+ switch (blkif->blk_protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc),
+ sizeof(req));
+ break;
+ case BLKIF_PROTOCOL_X86_32:
+ blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
+ break;
+ case BLKIF_PROTOCOL_X86_64:
+ blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
+ break;
+ default:
+ BUG();
+ }
+ blk_rings->common.req_cons = ++rc; /* before make_response() */
switch (req.operation) {
case BLKIF_OP_READ:
WPRINTK("blktap: fe_ring is full, can't add "
"IO Request will be dropped. %d %d\n",
RING_SIZE(&info->ufe_ring),
- RING_SIZE(&blkif->blk_ring));
+ RING_SIZE(&blkif->blk_rings.common));
goto fail_response;
}
static void make_response(blkif_t *blkif, unsigned long id,
unsigned short op, int st)
{
- blkif_response_t *resp;
+ blkif_response_t resp;
unsigned long flags;
- blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
int more_to_do = 0;
int notify;
+ resp.id = id;
+ resp.operation = op;
+ resp.status = st;
+
spin_lock_irqsave(&blkif->blk_ring_lock, flags);
- /* Place on the response ring for the relevant domain. */
- resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
- resp->id = id;
- resp->operation = op;
- resp->status = st;
- blk_ring->rsp_prod_pvt++;
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
-
- if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
+ /* Place on the response ring for the relevant domain. */
+ switch (blkif->blk_protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ memcpy(RING_GET_RESPONSE(&blk_rings->native,
+ blk_rings->native.rsp_prod_pvt),
+ &resp, sizeof(resp));
+ break;
+ case BLKIF_PROTOCOL_X86_32:
+ memcpy(RING_GET_RESPONSE(&blk_rings->x86_32,
+ blk_rings->x86_32.rsp_prod_pvt),
+ &resp, sizeof(resp));
+ break;
+ case BLKIF_PROTOCOL_X86_64:
+ memcpy(RING_GET_RESPONSE(&blk_rings->x86_64,
+ blk_rings->x86_64.rsp_prod_pvt),
+ &resp, sizeof(resp));
+ break;
+ default:
+ BUG();
+ }
+ blk_rings->common.rsp_prod_pvt++;
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
+
+ if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
/*
* Tail check for pending requests. Allows frontend to avoid
* notifications if requests are already in flight (lower
* overheads and promotes batching).
*/
- RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
- } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
+ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
+ } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
more_to_do = 1;
+ }
- }
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
if (more_to_do)
blkif_notify_work(blkif);
#include <asm/pgalloc.h>
#include <xen/evtchn.h>
#include <asm/hypervisor.h>
-#include <xen/interface/io/blkif.h>
-#include <xen/interface/io/ring.h>
+#include <xen/blkif.h>
#include <xen/gnttab.h>
#include <xen/driver_util.h>
/* Physical parameters of the comms window. */
unsigned int irq;
/* Comms information. */
- blkif_back_ring_t blk_ring;
+ enum blkif_protocol blk_protocol;
+ blkif_back_rings_t blk_rings;
struct vm_struct *blk_ring_area;
/* Back pointer to the backend_info. */
struct backend_info *be;
int tap_blkif_map(blkif_t *blkif, unsigned long shared_page,
unsigned int evtchn)
{
- blkif_sring_t *sring;
int err;
/* Already connected through? */
return err;
}
- sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
- BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
+ switch (blkif->blk_protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ {
+ blkif_sring_t *sring;
+ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
+ BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
+ break;
+ }
+ case BLKIF_PROTOCOL_X86_32:
+ {
+ blkif_x86_32_sring_t *sring_x86_32;
+ sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
+ BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
+ break;
+ }
+ case BLKIF_PROTOCOL_X86_64:
+ {
+ blkif_x86_64_sring_t *sring_x86_64;
+ sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
+ BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
+ break;
+ }
+ default:
+ BUG();
+ }
err = bind_interdomain_evtchn_to_irqhandler(
blkif->domid, evtchn, tap_blkif_be_int,
unbind_from_irqhandler(blkif->irq, blkif);
blkif->irq = 0;
}
- if (blkif->blk_ring.sring) {
+ if (blkif->blk_rings.common.sring) {
unmap_frontend_page(blkif);
free_vm_area(blkif->blk_ring_area);
- blkif->blk_ring.sring = NULL;
+ blkif->blk_rings.common.sring = NULL;
}
}
struct xenbus_device *dev = be->dev;
unsigned long ring_ref;
unsigned int evtchn;
+ char protocol[64];
int err;
DPRINTK("%s\n", dev->otherend);
return err;
}
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
+ "%63s", protocol, NULL);
+ if (err)
+ strcpy(protocol, "unspecified, assuming native");
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
+ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
+ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
+ else {
+ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
+ return -1;
+ }
+ printk("blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
+ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
+
/* Map the shared frame, irq etc. */
err = tap_blkif_map(be->blkif, ring_ref, evtchn);
if (err) {
--- /dev/null
+#ifndef __XEN_BLKIF_H__
+#define __XEN_BLKIF_H__
+
+#include <xen/interface/io/ring.h>
+#include <xen/interface/io/blkif.h>
+#include <xen/interface/io/protocols.h>
+
+/* Not a real protocol. Used to generate ring structs which contain
+ * the elements common to all protocols only. This way we get a
+ * compiler-checkable way to use common struct elements, so we can
+ * avoid using switch(protocol) in a number of places. */
+struct blkif_common_request {
+ char dummy;
+};
+struct blkif_common_response {
+ char dummy;
+};
+
+/* i386 protocol version */
+#pragma pack(push, 4)
+struct blkif_x86_32_request {
+ uint8_t operation; /* BLKIF_OP_??? */
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+struct blkif_x86_32_response {
+ uint64_t id; /* copied from request */
+ uint8_t operation; /* copied from request */
+ int16_t status; /* BLKIF_RSP_??? */
+};
+typedef struct blkif_x86_32_request blkif_x86_32_request_t;
+typedef struct blkif_x86_32_response blkif_x86_32_response_t;
+#pragma pack(pop)
+
+/* x86_64 protocol version */
+struct blkif_x86_64_request {
+ uint8_t operation; /* BLKIF_OP_??? */
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint64_t __attribute__((__aligned__(8))) id;
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+struct blkif_x86_64_response {
+ uint64_t __attribute__((__aligned__(8))) id;
+ uint8_t operation; /* copied from request */
+ int16_t status; /* BLKIF_RSP_??? */
+};
+typedef struct blkif_x86_64_request blkif_x86_64_request_t;
+typedef struct blkif_x86_64_response blkif_x86_64_response_t;
+
+DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response);
+DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
+DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
+
+union blkif_back_rings {
+ blkif_back_ring_t native;
+ blkif_common_back_ring_t common;
+ blkif_x86_32_back_ring_t x86_32;
+ blkif_x86_64_back_ring_t x86_64;
+};
+typedef union blkif_back_rings blkif_back_rings_t;
+
+enum blkif_protocol {
+ BLKIF_PROTOCOL_NATIVE = 1,
+ BLKIF_PROTOCOL_X86_32 = 2,
+ BLKIF_PROTOCOL_X86_64 = 3,
+};
+
+static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src)
+{
+ int i;
+ dst->operation = src->operation;
+ dst->nr_segments = src->nr_segments;
+ dst->handle = src->handle;
+ dst->id = src->id;
+ dst->sector_number = src->sector_number;
+ for (i = 0; i < src->nr_segments; i++)
+ dst->seg[i] = src->seg[i];
+}
+
+static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src)
+{
+ int i;
+ dst->operation = src->operation;
+ dst->nr_segments = src->nr_segments;
+ dst->handle = src->handle;
+ dst->id = src->id;
+ dst->sector_number = src->sector_number;
+ for (i = 0; i < src->nr_segments; i++)
+ dst->seg[i] = src->seg[i];
+}
+
+#endif /* __XEN_BLKIF_H__ */
*/
#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
+struct blkif_request_segment {
+ grant_ref_t gref; /* reference to I/O buffer frame */
+ /* @first_sect: first sector in frame to transfer (inclusive). */
+ /* @last_sect: last sector in frame to transfer (inclusive). */
+ uint8_t first_sect, last_sect;
+};
+
struct blkif_request {
uint8_t operation; /* BLKIF_OP_??? */
uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- struct blkif_request_segment {
- grant_ref_t gref; /* reference to I/O buffer frame */
- /* @first_sect: first sector in frame to transfer (inclusive). */
- /* @last_sect: last sector in frame to transfer (inclusive). */
- uint8_t first_sect, last_sect;
- } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
typedef struct blkif_request blkif_request_t;